A typesafe allocator submitted by Rusty Russel with trivial renames by me.
Signed-off-by: Rusty Russel <rusty@rustcorp.com.au> (authored)
Signed-off-by: ian.pratt@cl.cam.ac.uk
goto out;
}
- action = xmalloc(sizeof(irq_guest_action_t));
+ action = xmalloc(irq_guest_action_t);
if ( (desc->action = (struct irqaction *)action) == NULL )
{
DPRINTK("Cannot bind IRQ %d to guest. Out of memory.\n", irq);
unsigned lo, dummy;
if (!mtrr_state.var_ranges) {
- mtrr_state.var_ranges = xmalloc(num_var_ranges * sizeof (struct mtrr_var_range));
+ mtrr_state.var_ranges = xmalloc_array(struct mtrr_var_range,
+ num_var_ranges);
if (!mtrr_state.var_ranges)
return;
}
int i, max;
max = num_var_ranges;
- if ((usage_table = xmalloc(max * sizeof *usage_table))
- == NULL) {
+ if ((usage_table = xmalloc_array(unsigned int, max)) == NULL) {
printk(KERN_ERR "mtrr: could not allocate\n");
return;
}
{
struct mm_struct *m = &p->exec_domain[0]->mm;
- m->shadow_ht = xmalloc(
- shadow_ht_buckets * sizeof(struct shadow_status));
+ m->shadow_ht = xmalloc_array(struct shadow_status, shadow_ht_buckets);
if ( m->shadow_ht == NULL )
goto nomem;
memset(m->shadow_ht, 0, shadow_ht_buckets * sizeof(struct shadow_status));
* At this point, boot CPU has fully initialised the IDT. It is
* now safe to make ourselves a private copy.
*/
- idt_tables[cpu] = xmalloc(IDT_ENTRIES*8);
+ idt_tables[cpu] = xmalloc_array(struct desc_struct, IDT_ENTRIES);
memcpy(idt_tables[cpu], idt_table, IDT_ENTRIES*8);
*(unsigned short *)(&idt_load[0]) = (IDT_ENTRIES*8)-1;
*(unsigned long *)(&idt_load[2]) = (unsigned long)idt_tables[cpu];
if ( unlikely(sz == GET_HEAP_LIMIT(heap)) )
{
int i, limit = (GET_HEAP_LIMIT(heap)+1) << 1;
- struct ac_timer **new_heap = xmalloc(limit*sizeof(struct ac_timer *));
+ struct ac_timer **new_heap = xmalloc_array(struct ac_timer *, limit);
if ( new_heap == NULL ) BUG();
memcpy(new_heap, heap, (limit>>1)*sizeof(struct ac_timer *));
for ( i = 0; i < smp_num_cpus; i++ )
for ( i = 0; i < smp_num_cpus; i++ )
{
- ac_timers[i].heap = xmalloc(
- (DEFAULT_HEAP_LIMIT+1) * sizeof(struct ac_timer *));
+ ac_timers[i].heap = xmalloc_array(struct ac_timer *, DEFAULT_HEAP_LIMIT+1);
if ( ac_timers[i].heap == NULL ) BUG();
SET_HEAP_SIZE(ac_timers[i].heap, 0);
SET_HEAP_LIMIT(ac_timers[i].heap, DEFAULT_HEAP_LIMIT);
if ( op->u.getdomaininfo.ctxt != NULL )
{
- if ( (c = xmalloc(sizeof(*c))) == NULL )
+ if ( (c = xmalloc(full_execution_context_t)) == NULL )
{
ret = -ENOMEM;
put_domain(d);
int rc = 0;
full_execution_context_t *c;
- if ( (c = xmalloc(sizeof(*c))) == NULL )
+ if ( (c = xmalloc(full_execution_context_t)) == NULL )
return -ENOMEM;
if ( test_bit(DF_CONSTRUCTED, &p->d_flags) )
if ( alloc_exec_domain_struct(d, vcpu) == NULL )
return -ENOMEM;
- if ( (c = xmalloc(sizeof(*c))) == NULL )
+ if ( (c = xmalloc(full_execution_context_t)) == NULL )
{
rc = -ENOMEM;
goto out;
else
max = port + EVENT_CHANNELS_SPREAD;
- chn = xmalloc(max * sizeof(event_channel_t));
+ chn = xmalloc_array(event_channel_t, max);
if ( unlikely(chn == NULL) )
return -ENOMEM;
grant_table_t *t;
int i;
- if ( (t = xmalloc(sizeof(*t))) == NULL )
+ if ( (t = xmalloc(grant_table_t)) == NULL )
goto no_mem;
/* Simple stuff. */
spin_lock_init(&t->lock);
/* Active grant table. */
- if ( (t->active = xmalloc(sizeof(active_grant_entry_t) *
- NR_GRANT_ENTRIES)) == NULL )
+ if ( (t->active = xmalloc_array(active_grant_entry_t, NR_GRANT_ENTRIES))
+ == NULL )
goto no_mem;
memset(t->active, 0, sizeof(active_grant_entry_t) * NR_GRANT_ENTRIES);
return;
}
- if ( (pdev = xmalloc(sizeof(phys_dev_t))) == NULL )
+ if ( (pdev = xmalloc(phys_dev_t)) == NULL )
{
INFO("Error allocating pdev structure.\n");
return;
if ( ed->thread.io_bitmap == NULL )
{
- if ( (ed->thread.io_bitmap = xmalloc(IOBMP_BYTES)) == NULL )
+ if ( (ed->thread.io_bitmap = xmalloc_array(u8, IOBMP_BYTES)) == NULL )
{
rc = -ENOMEM;
goto out;
if ( (dev->hdr_type != PCI_HEADER_TYPE_NORMAL) &&
(dev->hdr_type != PCI_HEADER_TYPE_CARDBUS) )
continue;
- pdev = xmalloc(sizeof(phys_dev_t));
+ pdev = xmalloc(phys_dev_t);
pdev->dev = dev;
pdev->flags = ACC_WRITE;
pdev->state = 0;
*/
struct resource * __request_region(struct resource *parent, unsigned long start, unsigned long n, const char *name)
{
- struct resource *res = xmalloc(sizeof(*res));
+ struct resource *res = xmalloc(struct resource);
if (res) {
memset(res, 0, sizeof(*res));
{
ASSERT(p != NULL);
- p->sched_priv = xmem_cache_alloc(dom_info_cache);
+ p->sched_priv = xmalloc(struct at_dom_info);
if ( p->sched_priv == NULL )
return -1;
for ( i = 0; i < NR_CPUS; i++ )
{
- schedule_data[i].sched_priv = xmalloc(sizeof(struct bvt_cpu_info));
+ schedule_data[i].sched_priv = xmalloc(struct bvt_cpu_info);
if ( schedule_data[i].sched_priv == NULL )
{
{
struct pci_bus *b;
- b = xmalloc(sizeof(*b));
+ b = xmalloc(struct pci_bus);
if (b) {
memset(b, 0, sizeof(*b));
INIT_LIST_HEAD(&b->children);
if (l == 0xffffffff || l == 0x00000000 || l == 0x0000ffff || l == 0xffff0000)
return NULL;
- dev = xmalloc(sizeof(*dev));
+ dev = xmalloc(struct pci_dev);
if (!dev)
return NULL;
max = bus->secondary;
/* Create a device template */
- dev0 = xmalloc(sizeof(struct pci_dev));
+ dev0 = xmalloc(struct pci_dev);
if(!dev0) {
panic("Out of memory scanning PCI bus!\n");
}
ln->res->start;
}
if (r_align > align) {
- tmp = xmalloc(sizeof(*tmp));
+ tmp = xmalloc(struct resource_list);
if (!tmp)
panic("pdev_sort_resources(): "
- "xmalloc() failed!\n");
+ "malloc() failed!\n");
tmp->next = ln;
tmp->res = r;
tmp->dev = dev;
#include <xen/mm.h>
#include <xen/cache.h>
+#include <xen/types.h>
/* Flags to pass to xmem_cache_create(). */
/* NB. The first 3 are only valid when built with SLAB_DEBUG_SUPPORT. */
extern void dump_slabinfo();
+/* Nicely typesafe for you. */
+#define xmalloc(type) ((type *)xmalloc(sizeof(type)))
+#define xmalloc_array(type, num) ((type *)xmalloc_array(sizeof(type), (num)))
+
+static inline void *xmalloc_array(size_t size, size_t num)
+{
+ /* Check for overflow. */
+ if (size && num > UINT_MAX / size)
+ return NULL;
+ return xmalloc(size * num);
+}
#endif /* __ARCH_HAS_SLAB_ALLOCATOR */
#endif /* __SLAB_H__ */